// In this case, that is only a user vs supervisor access check.
//
if ( (rc = hvm_copy_from_guest_virt(val, addr, bytes)) == 0 )
- {
-#if 0
- struct vcpu *v = current;
- SHADOW_PRINTK("d=%u v=%u a=%#lx v=%#lx bytes=%u\n",
- v->domain->domain_id, v->vcpu_id,
- addr, *val, bytes);
-#endif
return X86EMUL_CONTINUE;
- }
/* If we got here, there was nothing mapped here, or a bad GFN
* was mapped here. This should never happen: we're here because
return X86EMUL_PROPAGATE_FAULT;
}
-void shadow_init_emulation(struct sh_emulate_ctxt *sh_ctxt,
- struct cpu_user_regs *regs)
-{
- struct segment_register *creg;
- struct vcpu *v = current;
- unsigned long addr;
-
- sh_ctxt->ctxt.regs = regs;
-
- /* Segment cache initialisation. Primed with CS. */
- sh_ctxt->valid_seg_regs = 0;
- creg = hvm_get_seg_reg(x86_seg_cs, sh_ctxt);
-
- /* Work out the emulation mode. */
- if ( hvm_long_mode_enabled(v) )
- sh_ctxt->ctxt.mode = creg->attr.fields.l ?
- X86EMUL_MODE_PROT64 : X86EMUL_MODE_PROT32;
- else if ( regs->eflags & X86_EFLAGS_VM )
- sh_ctxt->ctxt.mode = X86EMUL_MODE_REAL;
- else
- sh_ctxt->ctxt.mode = creg->attr.fields.db ?
- X86EMUL_MODE_PROT32 : X86EMUL_MODE_PROT16;
-
- /* Attempt to prefetch whole instruction. */
- sh_ctxt->insn_buf_bytes =
- (!hvm_translate_linear_addr(
- x86_seg_cs, regs->eip, sizeof(sh_ctxt->insn_buf),
- hvm_access_insn_fetch, sh_ctxt, &addr) &&
- !hvm_copy_from_guest_virt(
- sh_ctxt->insn_buf, addr, sizeof(sh_ctxt->insn_buf)))
- ? sizeof(sh_ctxt->insn_buf) : 0;
-}
-
static int
-sh_x86_emulate_read(enum x86_segment seg,
- unsigned long offset,
- unsigned long *val,
- unsigned int bytes,
- struct x86_emulate_ctxt *ctxt)
+hvm_emulate_read(enum x86_segment seg,
+ unsigned long offset,
+ unsigned long *val,
+ unsigned int bytes,
+ struct x86_emulate_ctxt *ctxt)
{
return hvm_read(seg, offset, val, bytes, hvm_access_read,
container_of(ctxt, struct sh_emulate_ctxt, ctxt));
}
static int
-sh_x86_emulate_insn_fetch(enum x86_segment seg,
- unsigned long offset,
- unsigned long *val,
- unsigned int bytes,
- struct x86_emulate_ctxt *ctxt)
+hvm_emulate_insn_fetch(enum x86_segment seg,
+ unsigned long offset,
+ unsigned long *val,
+ unsigned int bytes,
+ struct x86_emulate_ctxt *ctxt)
{
struct sh_emulate_ctxt *sh_ctxt =
container_of(ctxt, struct sh_emulate_ctxt, ctxt);
}
static int
-sh_x86_emulate_write(enum x86_segment seg,
- unsigned long offset,
- unsigned long val,
- unsigned int bytes,
- struct x86_emulate_ctxt *ctxt)
+hvm_emulate_write(enum x86_segment seg,
+ unsigned long offset,
+ unsigned long val,
+ unsigned int bytes,
+ struct x86_emulate_ctxt *ctxt)
{
struct sh_emulate_ctxt *sh_ctxt =
container_of(ctxt, struct sh_emulate_ctxt, ctxt);
if ( rc )
return rc;
-#if 0
- SHADOW_PRINTK("d=%u v=%u a=%#lx v=%#lx bytes=%u\n",
- v->domain->domain_id, v->vcpu_id, addr, val, bytes);
-#endif
return v->arch.shadow.mode->x86_emulate_write(
v, addr, &val, bytes, sh_ctxt);
}
static int
-sh_x86_emulate_cmpxchg(enum x86_segment seg,
- unsigned long offset,
- unsigned long old,
- unsigned long new,
- unsigned int bytes,
- struct x86_emulate_ctxt *ctxt)
+hvm_emulate_cmpxchg(enum x86_segment seg,
+ unsigned long offset,
+ unsigned long old,
+ unsigned long new,
+ unsigned int bytes,
+ struct x86_emulate_ctxt *ctxt)
{
struct sh_emulate_ctxt *sh_ctxt =
container_of(ctxt, struct sh_emulate_ctxt, ctxt);
if ( rc )
return rc;
-#if 0
- SHADOW_PRINTK("d=%u v=%u a=%#lx o?=%#lx n:=%#lx bytes=%u\n",
- v->domain->domain_id, v->vcpu_id, addr, old, new, bytes);
-#endif
return v->arch.shadow.mode->x86_emulate_cmpxchg(
v, addr, old, new, bytes, sh_ctxt);
}
static int
-sh_x86_emulate_cmpxchg8b(enum x86_segment seg,
- unsigned long offset,
- unsigned long old_lo,
- unsigned long old_hi,
- unsigned long new_lo,
- unsigned long new_hi,
- struct x86_emulate_ctxt *ctxt)
+hvm_emulate_cmpxchg8b(enum x86_segment seg,
+ unsigned long offset,
+ unsigned long old_lo,
+ unsigned long old_hi,
+ unsigned long new_lo,
+ unsigned long new_hi,
+ struct x86_emulate_ctxt *ctxt)
{
struct sh_emulate_ctxt *sh_ctxt =
container_of(ctxt, struct sh_emulate_ctxt, ctxt);
if ( rc )
return rc;
-#if 0
- SHADOW_PRINTK("d=%u v=%u a=%#lx o?=%#lx:%lx n:=%#lx:%lx\n",
- v->domain->domain_id, v->vcpu_id, addr, old_hi, old_lo,
- new_hi, new_lo, ctxt);
-#endif
return v->arch.shadow.mode->x86_emulate_cmpxchg8b(
v, addr, old_lo, old_hi, new_lo, new_hi, sh_ctxt);
}
+static struct x86_emulate_ops hvm_shadow_emulator_ops = {
+ .read = hvm_emulate_read,
+ .insn_fetch = hvm_emulate_insn_fetch,
+ .write = hvm_emulate_write,
+ .cmpxchg = hvm_emulate_cmpxchg,
+ .cmpxchg8b = hvm_emulate_cmpxchg8b,
+};
+
+static int
+pv_emulate_read(enum x86_segment seg,
+ unsigned long offset,
+ unsigned long *val,
+ unsigned int bytes,
+ struct x86_emulate_ctxt *ctxt)
+{
+ unsigned int rc;
+
+ *val = 0;
+ if ( (rc = copy_from_user((void *)val, (void *)offset, bytes)) != 0 )
+ {
+ propagate_page_fault(offset + bytes - rc, 0); /* read fault */
+ return X86EMUL_PROPAGATE_FAULT;
+ }
+
+ return X86EMUL_CONTINUE;
+}
+
+static int
+pv_emulate_write(enum x86_segment seg,
+ unsigned long offset,
+ unsigned long val,
+ unsigned int bytes,
+ struct x86_emulate_ctxt *ctxt)
+{
+ struct sh_emulate_ctxt *sh_ctxt =
+ container_of(ctxt, struct sh_emulate_ctxt, ctxt);
+ struct vcpu *v = current;
+ return v->arch.shadow.mode->x86_emulate_write(
+ v, offset, &val, bytes, sh_ctxt);
+}
+
+static int
+pv_emulate_cmpxchg(enum x86_segment seg,
+ unsigned long offset,
+ unsigned long old,
+ unsigned long new,
+ unsigned int bytes,
+ struct x86_emulate_ctxt *ctxt)
+{
+ struct sh_emulate_ctxt *sh_ctxt =
+ container_of(ctxt, struct sh_emulate_ctxt, ctxt);
+ struct vcpu *v = current;
+ return v->arch.shadow.mode->x86_emulate_cmpxchg(
+ v, offset, old, new, bytes, sh_ctxt);
+}
+
+static int
+pv_emulate_cmpxchg8b(enum x86_segment seg,
+ unsigned long offset,
+ unsigned long old_lo,
+ unsigned long old_hi,
+ unsigned long new_lo,
+ unsigned long new_hi,
+ struct x86_emulate_ctxt *ctxt)
+{
+ struct sh_emulate_ctxt *sh_ctxt =
+ container_of(ctxt, struct sh_emulate_ctxt, ctxt);
+ struct vcpu *v = current;
+ return v->arch.shadow.mode->x86_emulate_cmpxchg8b(
+ v, offset, old_lo, old_hi, new_lo, new_hi, sh_ctxt);
+}
-struct x86_emulate_ops shadow_emulator_ops = {
- .read = sh_x86_emulate_read,
- .insn_fetch = sh_x86_emulate_insn_fetch,
- .write = sh_x86_emulate_write,
- .cmpxchg = sh_x86_emulate_cmpxchg,
- .cmpxchg8b = sh_x86_emulate_cmpxchg8b,
+static struct x86_emulate_ops pv_shadow_emulator_ops = {
+ .read = pv_emulate_read,
+ .insn_fetch = pv_emulate_read,
+ .write = pv_emulate_write,
+ .cmpxchg = pv_emulate_cmpxchg,
+ .cmpxchg8b = pv_emulate_cmpxchg8b,
};
+struct x86_emulate_ops *shadow_init_emulation(
+ struct sh_emulate_ctxt *sh_ctxt, struct cpu_user_regs *regs)
+{
+ struct segment_register *creg;
+ struct vcpu *v = current;
+ unsigned long addr;
+
+ sh_ctxt->ctxt.regs = regs;
+
+ if ( !is_hvm_vcpu(v) )
+ {
+ sh_ctxt->ctxt.mode = X86EMUL_MODE_HOST;
+ return &pv_shadow_emulator_ops;
+ }
+
+ /* Segment cache initialisation. Primed with CS. */
+ sh_ctxt->valid_seg_regs = 0;
+ creg = hvm_get_seg_reg(x86_seg_cs, sh_ctxt);
+
+ /* Work out the emulation mode. */
+ if ( hvm_long_mode_enabled(v) )
+ sh_ctxt->ctxt.mode = creg->attr.fields.l ?
+ X86EMUL_MODE_PROT64 : X86EMUL_MODE_PROT32;
+ else if ( regs->eflags & X86_EFLAGS_VM )
+ sh_ctxt->ctxt.mode = X86EMUL_MODE_REAL;
+ else
+ sh_ctxt->ctxt.mode = creg->attr.fields.db ?
+ X86EMUL_MODE_PROT32 : X86EMUL_MODE_PROT16;
+
+ /* Attempt to prefetch whole instruction. */
+ sh_ctxt->insn_buf_bytes =
+ (!hvm_translate_linear_addr(
+ x86_seg_cs, regs->eip, sizeof(sh_ctxt->insn_buf),
+ hvm_access_insn_fetch, sh_ctxt, &addr) &&
+ !hvm_copy_from_guest_virt(
+ sh_ctxt->insn_buf, addr, sizeof(sh_ctxt->insn_buf)))
+ ? sizeof(sh_ctxt->insn_buf) : 0;
+
+ return &hvm_shadow_emulator_ops;
+}
+
/**************************************************************************/
/* Code for "promoting" a guest page to the point where the shadow code is
* willing to let it be treated as a guest page table. This generally
#define copy_to_guest_offset(hnd, off, ptr, nr) ({ \
const typeof(ptr) _x = (hnd).p; \
const typeof(ptr) _y = (ptr); \
- shadow_mode_translate(current->domain) ? \
+ is_hvm_vcpu(current) ? \
copy_to_user_hvm(_x+(off), _y, sizeof(*_x)*(nr)) : \
copy_to_user(_x+(off), _y, sizeof(*_x)*(nr)); \
})
#define copy_from_guest_offset(ptr, hnd, off, nr) ({ \
const typeof(ptr) _x = (hnd).p; \
const typeof(ptr) _y = (ptr); \
- shadow_mode_translate(current->domain) ? \
+ is_hvm_vcpu(current) ? \
copy_from_user_hvm(_y, _x+(off), sizeof(*_x)*(nr)) :\
copy_from_user(_y, _x+(off), sizeof(*_x)*(nr)); \
})
#define copy_field_to_guest(hnd, ptr, field) ({ \
const typeof(&(ptr)->field) _x = &(hnd).p->field; \
const typeof(&(ptr)->field) _y = &(ptr)->field; \
- shadow_mode_translate(current->domain) ? \
+ is_hvm_vcpu(current) ? \
copy_to_user_hvm(_x, _y, sizeof(*_x)) : \
copy_to_user(_x, _y, sizeof(*_x)); \
})
#define copy_field_from_guest(ptr, hnd, field) ({ \
const typeof(&(ptr)->field) _x = &(hnd).p->field; \
const typeof(&(ptr)->field) _y = &(ptr)->field; \
- shadow_mode_translate(current->domain) ? \
+ is_hvm_vcpu(current) ? \
copy_from_user_hvm(_y, _x, sizeof(*_x)) : \
copy_from_user(_y, _x, sizeof(*_x)); \
})
#define __copy_to_guest_offset(hnd, off, ptr, nr) ({ \
const typeof(ptr) _x = (hnd).p; \
const typeof(ptr) _y = (ptr); \
- shadow_mode_translate(current->domain) ? \
+ is_hvm_vcpu(current) ? \
copy_to_user_hvm(_x+(off), _y, sizeof(*_x)*(nr)) : \
__copy_to_user(_x+(off), _y, sizeof(*_x)*(nr)); \
})
#define __copy_from_guest_offset(ptr, hnd, off, nr) ({ \
const typeof(ptr) _x = (hnd).p; \
const typeof(ptr) _y = (ptr); \
- shadow_mode_translate(current->domain) ? \
+ is_hvm_vcpu(current) ? \
copy_from_user_hvm(_y, _x+(off),sizeof(*_x)*(nr)) : \
__copy_from_user(_y, _x+(off), sizeof(*_x)*(nr)); \
})
#define __copy_field_to_guest(hnd, ptr, field) ({ \
const typeof(&(ptr)->field) _x = &(hnd).p->field; \
const typeof(&(ptr)->field) _y = &(ptr)->field; \
- shadow_mode_translate(current->domain) ? \
+ is_hvm_vcpu(current) ? \
copy_to_user_hvm(_x, _y, sizeof(*_x)) : \
__copy_to_user(_x, _y, sizeof(*_x)); \
})
#define __copy_field_from_guest(ptr, hnd, field) ({ \
const typeof(&(ptr)->field) _x = &(hnd).p->field; \
const typeof(&(ptr)->field) _y = &(ptr)->field; \
- shadow_mode_translate(current->domain) ? \
+ is_hvm_vcpu(current) ? \
copy_from_user_hvm(_x, _y, sizeof(*_x)) : \
__copy_from_user(_y, _x, sizeof(*_x)); \
})